import xarray as xr
import numpy as np
import intake
import ast
import dask_jobqueue
import matplotlib.pyplot as plt
from dask_jobqueue import PBSCluster
Run the cell below if the notebook is running on a NCAR supercomputer.
If the notebook is running on a different parallel computing environment, you will need
to replace the usage of NCARCluster with a similar object from dask_jobqueue or dask_gateway.
num_jobs = 20
walltime = '0:20:00'
memory='10GB'
cluster = PBSCluster(cores=1, processes=1, walltime=walltime, memory=memory, queue='casper',
resource_spec='select=1:ncpus=1:mem=10GB',)
cluster.scale(jobs=num_jobs)
from distributed import Client
client = Client(cluster)
cluster
☝️ Link to Dask dashboard will appear above.
# If True, use NCAR Cloud Storage. Requires an NCAR user account.
# If False, use AWS Cloud Storage.
USE_NCAR_CLOUD_STORAGE = False
if USE_NCAR_CLOUD_STORAGE:
catalog_url = "https://stratus.ucar.edu/ncar-na-cordex/catalogs/aws-na-cordex.json"
storage_options={"anon": True, 'client_kwargs':{"endpoint_url":"https://stratus.ucar.edu/"}}
else:
catalog_url = "https://ncar-na-cordex.s3-us-west-2.amazonaws.com/catalogs/aws-na-cordex.json"
storage_options={"anon": True}
# Have the catalog interpret the "na-cordex-models" column as a list of values, as opposed to single values.
col = intake.open_esm_datastore(catalog_url, read_csv_kwargs={"converters": {"na-cordex-models": ast.literal_eval}},)
col
# Show the first few lines of the catalog
col.df.head(10)
# Produce a catalog content summary.
uniques = col.unique()
columns = ["variable", "scenario", "grid", "na-cordex-models", "bias_correction"]
for column in columns:
print(f'{column}: {uniques[column]}\n')
data_var = 'tmax'
col_subset = col.search(
variable=data_var,
grid="NAM-44i",
scenario="eval",
bias_correction="raw",
)
col_subset
col_subset.df
# Load catalog entries for subset into a dictionary of xarray datasets, and open the first one.
dsets = col_subset.to_dataset_dict(
xarray_open_kwargs={"consolidated": True}, storage_options=storage_options
)
print(f"\nDataset dictionary keys:\n {dsets.keys()}")
# Load the first dataset and display a summary.
dataset_key = list(dsets.keys())[0]
store_name = dataset_key + ".zarr"
ds = dsets[dataset_key]
ds
# Note that the summary includes a 'member_id' coordinate, which is a renaming of the
# 'na-cordex-models' column in the catalog.
def plotMap(ax, map_slice, date_object=None, member_id=None):
"""Create a map plot on the given axes, with min/max as text"""
ax.imshow(map_slice, origin='lower')
minval = map_slice.min(dim = ['lat', 'lon'])
maxval = map_slice.max(dim = ['lat', 'lon'])
# Format values to have at least 4 digits of precision.
ax.text(0.01, 0.03, "Min: %3g" % minval, transform=ax.transAxes, fontsize=12)
ax.text(0.99, 0.03, "Max: %3g" % maxval, transform=ax.transAxes, fontsize=12, horizontalalignment='right')
ax.set_xticks([])
ax.set_yticks([])
if date_object:
ax.set_title(date_object.values.astype(str)[:10], fontsize=12)
if member_id:
ax.set_ylabel(member_id, fontsize=12)
return ax
def getValidDateIndexes(member_slice):
"""Search for the first and last dates with finite values."""
min_values = member_slice.min(dim = ['lat', 'lon'])
is_finite = np.isfinite(min_values)
finite_indexes = np.where(is_finite)
start_index = finite_indexes[0][0]
end_index = finite_indexes[0][-1]
return start_index, end_index
def plot_first_mid_last(ds, data_var, store_name):
"""Plot the first, middle, and final time steps for several climate runs."""
num_members_to_plot = 4
member_names = ds.coords['member_id'].values[0:num_members_to_plot]
figWidth = 18
figHeight = 12
numPlotColumns = 3
fig, axs = plt.subplots(num_members_to_plot, numPlotColumns, figsize=(figWidth, figHeight), constrained_layout=True)
for index in np.arange(num_members_to_plot):
mem_id = member_names[index]
data_slice = ds[data_var].sel(member_id=mem_id)
start_index, end_index = getValidDateIndexes(data_slice)
midDateIndex = np.floor(len(ds.time) / 2).astype(int)
startDate = ds.time[start_index]
first_step = data_slice.sel(time=startDate)
ax = axs[index, 0]
plotMap(ax, first_step, startDate, mem_id)
midDate = ds.time[midDateIndex]
mid_step = data_slice.sel(time=midDate)
ax = axs[index, 1]
plotMap(ax, mid_step, midDate)
endDate = ds.time[end_index]
last_step = data_slice.sel(time=endDate)
ax = axs[index, 2]
plotMap(ax, last_step, endDate)
plt.suptitle(f'First, Middle, and Last Timesteps for Selected Runs in "{store_name}"', fontsize=20)
return fig
def plot_stat_maps(ds, data_var, store_name):
"""Plot the mean, min, max, and standard deviation values for several climate runs, aggregated over time."""
num_members_to_plot = 4
member_names = ds.coords['member_id'].values[0:num_members_to_plot]
figWidth = 25
figHeight = 12
numPlotColumns = 4
fig, axs = plt.subplots(num_members_to_plot, numPlotColumns, figsize=(figWidth, figHeight), constrained_layout=True)
for index in np.arange(num_members_to_plot):
mem_id = member_names[index]
data_slice = ds[data_var].sel(member_id=mem_id)
data_agg = data_slice.min(dim='time')
plotMap(axs[index, 0], data_agg, member_id=mem_id)
data_agg = data_slice.max(dim='time')
plotMap(axs[index, 1], data_agg)
data_agg = data_slice.mean(dim='time')
plotMap(axs[index, 2], data_agg)
data_agg = data_slice.std(dim='time')
plotMap(axs[index, 3], data_agg)
axs[0, 0].set_title(f'min({data_var})', fontsize=15)
axs[0, 1].set_title(f'max({data_var})', fontsize=15)
axs[0, 2].set_title(f'mean({data_var})', fontsize=15)
axs[0, 3].set_title(f'std({data_var})', fontsize=15)
plt.suptitle(f'Spatial Statistics for Selected Runs in "{store_name}"', fontsize=20)
return fig
Also show which dates have no available data values, as a rug plot.
def plot_timeseries(ds, data_var, store_name):
"""Plot the mean, min, max, and standard deviation values for several climate runs,
aggregated over lat/lon dimensions."""
num_members_to_plot = 4
member_names = ds.coords['member_id'].values[0:num_members_to_plot]
figWidth = 25
figHeight = 20
linewidth = 0.5
numPlotColumns = 1
fig, axs = plt.subplots(num_members_to_plot, numPlotColumns, figsize=(figWidth, figHeight))
for index in np.arange(num_members_to_plot):
mem_id = member_names[index]
data_slice = ds[data_var].sel(member_id=mem_id)
unit_string = ds[data_var].attrs['units']
min_vals = data_slice.min(dim = ['lat', 'lon'])
max_vals = data_slice.max(dim = ['lat', 'lon'])
mean_vals = data_slice.mean(dim = ['lat', 'lon'])
std_vals = data_slice.std(dim = ['lat', 'lon'])
#missing_indexes = np.isnan(min_vals)
missing_indexes = np.isnan(min_vals).compute()
missing_times = ds.time[missing_indexes]
axs[index].plot(ds.time, max_vals, linewidth=linewidth, label='max', color='red')
axs[index].plot(ds.time, mean_vals, linewidth=linewidth, label='mean', color='black')
axs[index].fill_between(ds.time, (mean_vals - std_vals), (mean_vals + std_vals),
color='grey', linewidth=0, label='std', alpha=0.5)
axs[index].plot(ds.time, min_vals, linewidth=linewidth, label='min', color='blue')
ymin, ymax = axs[index].get_ylim()
rug_y = ymin + 0.01*(ymax-ymin)
axs[index].plot(missing_times, [rug_y]*len(missing_times), '|', color='m', label='missing')
axs[index].set_title(mem_id, fontsize=20)
axs[index].legend(loc='upper right')
axs[index].set_ylabel(unit_string)
plt.tight_layout(pad=10.2, w_pad=3.5, h_pad=3.5)
plt.suptitle(f'Temporal Statistics for Selected Runs in "{store_name}"', fontsize=20)
return fig
%%time
# Plot using the Zarr Store obtained from an earlier step in the notebook.
figure = plot_first_mid_last(ds, data_var, store_name)
plt.show()
Change the value of SAVE_PLOT to True to produce a PNG file of the plot. The file will be saved in the same folder as this notebook.
Then use Jupyter's file browser to locate the file and right-click the file to download it.
SAVE_PLOT = False
if SAVE_PLOT:
plotfile = f'./{dataset_key}_FML.png'
figure.savefig(plotfile, dpi=100)
%%time
# Plot using the Zarr Store obtained from an earlier step in the notebook.
figure = plot_stat_maps(ds, data_var, store_name)
plt.show()
Change the value of SAVE_PLOT to True to produce a PNG file of the plot. The file will be saved in the same folder as this notebook.
Then use Jupyter's file browser to locate the file and right-click the file to download it.
SAVE_PLOT = False
if SAVE_PLOT:
plotfile = f'./{dataset_key}_MAPS.png'
figure.savefig(plotfile, dpi=100)
%%time
# Plot using the Zarr Store obtained from an earlier step in the notebook.
figure = plot_timeseries(ds, data_var, store_name)
plt.show()
Change the value of SAVE_PLOT to True to produce a PNG file of the plot. The file will be saved in the same folder as this notebook.
Then use Jupyter's file browser to locate the file and right-click the file to download it.
SAVE_PLOT = False
if SAVE_PLOT:
plotfile = f'./{dataset_key}_TS.png'
figure.savefig(plotfile, dpi=100)
!date
cluster.close()
%load_ext watermark
%watermark -iv